home *** CD-ROM | disk | FTP | other *** search
- /* Rotation code from Tomas Rokicki. In his words:
- **
- ** "The blitter would be much faster, as it turns out . . .
- ** I even figured out how to do it, exactly, but
- ** I don't want to debug the blitter tonight . . ."
- */
-
- /*
- * This code flips an 8 x 8 rectangle to the right 90 degrees.
- * Accompanying it is a set of routines which flip
- * a 128x128 section of the workbench screen (in the center.)
- * Note that the blitter could be up to 4 times faster (according
- * to my preliminary calculations), but it would probably also be
- * more complex, and this should be plenty fast. Also, for small
- * rectangles like 16 x 16, this will certainly be faster.
- */
- /*
- * This subroutine takes four arguments, two for the source and
- * two for the destination. There is a pointer to the first byte
- * to be flipped, and the value to be added to the address for
- * each subsequent byte.
- */
- flip(sp, sw, dp, dw)
- unsigned char *sp, *dp ;
- int sw, dw ;
- {
- /* 8(a5), 14(a5), 12(a5), 18(a5) */
- #asm
- ;
- ; This subroutine actually flips the eight bytes in the lower byte
- ; of d0-d7, and sticks the results in the upper bytes of d0-d7,
- ; rotating their previous contents down. Don't let the length of
- ; it scare you; you might be able to discern a pattern in the
- ; instructions. It's really quite simple.
- ;
- movem.w reglist,-(sp)
- move.l 8(a5),a0
- move.w 12(a5),a1
- move.b (a0),d0
- add.w a1,a0
- move.b (a0),d1
- add.w a1,a0
- move.b (a0),d2
- add.w a1,a0
- move.b (a0),d3
- add.w a1,a0
- move.b (a0),d4
- add.w a1,a0
- move.b (a0),d5
- add.w a1,a0
- move.b (a0),d6
- add.w a1,a0
- move.b (a0),d7
- roxr.b #1,d0
- roxr.w #1,d0
- roxr.w #1,d1
- roxr.w #1,d0
- roxr.w #1,d2
- roxr.w #1,d0
- roxr.w #1,d3
- roxr.w #1,d0
- roxr.w #1,d4
- roxr.w #1,d0
- roxr.w #1,d5
- roxr.w #1,d0
- roxr.w #1,d6
- roxr.w #1,d0
- roxr.w #1,d7
- roxl.w #8,d0
- roxr.b #1,d1
- roxr.w #1,d1
- roxr.w #1,d2
- roxr.w #1,d1
- roxr.w #1,d3
- roxr.w #1,d1
- roxr.w #1,d4
- roxr.w #1,d1
- roxr.w #1,d5
- roxr.w #1,d1
- roxr.w #1,d6
- roxr.w #1,d1
- roxr.w #1,d7
- roxl.w #8,d1
- roxr.b #1,d2
- roxr.w #1,d2
- roxr.w #1,d3
- roxr.w #1,d2
- roxr.w #1,d4
- roxr.w #1,d2
- roxr.w #1,d5
- roxr.w #1,d2
- roxr.w #1,d6
- roxr.w #1,d2
- roxr.w #1,d7
- roxl.w #8,d2
- roxr.b #1,d3
- roxr.w #1,d3
- roxr.w #1,d4
- roxr.w #1,d3
- roxr.w #1,d5
- roxr.w #1,d3
- roxr.w #1,d6
- roxr.w #1,d3
- roxr.w #1,d7
- roxl.w #8,d3
- roxr.b #1,d4
- roxr.w #1,d4
- roxr.w #1,d5
- roxr.w #1,d4
- roxr.w #1,d6
- roxr.w #1,d4
- roxr.w #1,d7
- roxl.w #8,d4
- roxr.b #1,d5
- roxr.w #1,d5
- roxr.w #1,d6
- roxr.w #1,d5
- roxr.w #1,d7
- roxl.w #8,d5
- roxr.b #1,d6
- roxr.w #1,d6
- roxr.w #1,d7
- roxl.w #8,d6
- roxr.b #1,d7
- roxl.w #8,d7
- move.l 14(a5),a0
- move.w 18(a5),a1
- move.b d7,(a0)
- add.w a1,a0
- move.b d6,(a0)
- add.w a1,a0
- move.b d5,(a0)
- add.w a1,a0
- move.b d4,(a0)
- add.w a1,a0
- move.b d3,(a0)
- add.w a1,a0
- move.b d2,(a0)
- add.w a1,a0
- move.b d1,(a0)
- add.w a1,a0
- move.b d0,(a0)
- movem.w (sp)+,reglist
- reglist reg d0/d1/d2/d3/d4/d5/d6/d7/a0/a1
- #endasm
- }
-
-